Close

@InProceedings{QueirozCoMoBrJaMu:2010:GeFaGr,
               author = "Queiroz, Rossana B. and Cohen, Marcelo and Moreira, Juliano and 
                         Braun, Adriana and Jacques Junior, Julio C. and Musse, Soraia R.",
          affiliation = "{Pontif{\'{\i}}cia Universidade Cat{\'o}lica do Rio Grande do 
                         Sul} and {Pontif{\'{\i}}cia Universidade Cat{\'o}lica do Rio 
                         Grande do Sul} and {Pontif{\'{\i}}cia Universidade Cat{\'o}lica 
                         do Rio Grande do Sul} and {Pontif{\'{\i}}cia Universidade 
                         Cat{\'o}lica do Rio Grande do Sul} and {Pontif{\'{\i}}cia 
                         Universidade Cat{\'o}lica do Rio Grande do Sul} and 
                         {Pontif{\'{\i}}cia Universidade Cat{\'o}lica do Rio Grande do 
                         Sul}",
                title = "Generating Facial Ground Truth with Synthetic Faces",
            booktitle = "Proceedings...",
                 year = "2010",
               editor = "Bellon, Olga and Esperan{\c{c}}a, Claudio",
         organization = "Conference on Graphics, Patterns and Images, 23. (SIBGRAPI)",
            publisher = "IEEE Computer Society",
              address = "Los Alamitos",
             keywords = "Computer Vision, Ground Truth, Computer Animation.",
             abstract = "This work describes a methodology for generation of facial ground 
                         truth with synthetic faces. Our focus is to provide a way to 
                         generate accurate data for the evaluation of Computer Vision 
                         algorithms, in terms of facial detection and its components. Such 
                         algorithms play a key role in face detection. We present a 
                         prototype in which we can generate facial animation videos using a 
                         3D face models database, controlling face actions, illumination 
                         conditions and camera position. The facial animation platform 
                         allows us to generate animations with speech, facial expressions 
                         and eye motion, in order to approach realistic human face 
                         behavior. In addition, our model provides the ground truth of a 
                         set of facial feature points at each frame. As result, we are able 
                         to build a video database of synthetic human faces with ground 
                         truth, which can be used for training/evaluation of several 
                         algorithms for tracking and/or detection. We also present 
                         experiments using our generated videos to evaluate face, eye and 
                         mouth detection algorithms, comparing their performance with real 
                         video sequences.",
  conference-location = "Gramado, RS, Brazil",
      conference-year = "30 Aug.-3 Sep. 2010",
                  doi = "10.1109/SIBGRAPI.2010.12",
                  url = "http://dx.doi.org/10.1109/SIBGRAPI.2010.12",
             language = "en",
                  ibi = "8JMKD3MGPBW34M/3884JEB",
                  url = "http://urlib.net/ibi/8JMKD3MGPBW34M/3884JEB",
           targetfile = "Camera_Ready_70481.pdf",
        urlaccessdate = "2024, Apr. 28"
}


Close